resize image¶

In [ ]:
import os
import cv2
os.chdir("D:/Dataset/cancer")
img =cv2.imread('D:/Dataset/cancer/1.jpg')
In [ ]:
import os
import cv2
os.chdir("D:/Dataset/Non-Cancer")
img =cv2.imread('D:/Dataset/Non-Cancer/1.jpg')
In [ ]:
cv2.imshow('img',img)
cv2.waitKey(6000)
cv2.destroyAllWindows()
In [ ]:
resize=cv2.resize(img,(256,256),interpolation=cv2.INTER_AREA)
In [ ]:
cv2.imshow('res',resize)
cv2.waitKey(2000)
cv2.destroyAllWindows()
In [ ]:
cv2.imwrite("img.jpg",resize)
Out[ ]:
True
In [ ]:
for j in range(1,8):   
    for i in range(0,50):
        im=cv2.imread("D:/Dataset/cancer/"+str(j)+".jpg")
        resize=cv2.resize(im,(1028,1028),interpolation=cv2.INTER_AREA)
        cv2.imwrite("D:/Dataset/cancer/"+str(j)+str(i)+".jpg",resize)
In [ ]:
for j in range(1,6):   
    for i in range(0,50):
        im=cv2.imread('D:/Dataset/Non-Cancer/'+str(j)+'.jpg')
        resize=cv2.resize(im,(1028,1028),interpolation=cv2.INTER_AREA)
        cv2.imwrite("D:/Dataset/Non-Cancer/"+str(j)+str(i)+".jpg",resize)
In [ ]:
for i in range(1,8):
    n=str(i)
    img=cv2.imread('D:/dataset/cancer/'+n+'.jpg')
    resize=cv2.resize(img,(1028,1028),interpolation=cv2.INTER_AREA)
    cv2.imwrite("D:/Dataset/Resized cancer/"+n+".jpg",resize)
In [ ]:
for i in range(1,6):
    n=str(i)
    img=cv2.imread('D:/dataset/Non-Cancer/'+n+'.jpg')
    resize=cv2.resize(img,(1028,1028),interpolation=cv2.INTER_AREA)
    cv2.imwrite("D:/Dataset/Resized Non-Cancer/"+n+".jpg",resize)

Image Data Generation¶

In [ ]:
from tensorflow.keras.utils import array_to_img, img_to_array, load_img
In [ ]:
from keras.preprocessing.image import ImageDataGenerator
In [ ]:
datagen = ImageDataGenerator(rotation_range=40,width_shift_range=0.1,
                            height_shift_range=0.1,shear_range=0.2,
                            zoom_range=0.2,horizontal_flip=False,
                            fill_mode='nearest')
In [ ]:
def img_gene(img):
    img = load_img(img)
    x = img_to_array(img)
    x = x.reshape((1,) + x.shape)
    i = 0
    for batch in datagen.flow(x, batch_size=1,save_to_dir='D:/Dataset/cancer',
                              save_prefix='10', save_format='jpg'):
        i+= 1
        if i > 20:
            break
In [ ]:
for p in range(1,7):
    num=str(p)
    img="D:/Dataset/Resized cancer/"+num+".jpg"
    img_gene(img)
In [ ]:
import os
# assign directory
directory = "D:/Dataset/Resized Non-Cancer/"
 
# iterate over files in
# that directory
for c in os.listdir(directory):
    f = os.path.join(directory, c)
    if os.path.isfile(f):
        #img="D:/Dataset/Resized cancer/"+num+".jpg"
        img_gene(f)
In [ ]:
import numpy as np
import pandas as pd 
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from time import perf_counter 
import os
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from tensorflow.keras.utils import plot_model
In [ ]:
## Defining batch specfications
batch_size = 100
img_height = 250
img_width = 250
In [ ]:
## loading training set
training_data = tf.keras.preprocessing.image_dataset_from_directory(
    'D:/Dataset/Train',
    seed=42,
    image_size= (img_height, img_width),
    batch_size=batch_size,
    color_mode='rgb'
)
Found 231 files belonging to 2 classes.
In [ ]:
## loading validation dataset
validation_data =  tf.keras.preprocessing.image_dataset_from_directory(
    'D:/Dataset/Val',
    seed=42,
    image_size= (img_height, img_width),
    batch_size=batch_size,
    color_mode='rgb'
)
Found 231 files belonging to 2 classes.
In [ ]:
## loading testing dataset
testing_data = tf.keras.preprocessing.image_dataset_from_directory(
    'D:/Dataset/test',
    seed=42,
    image_size= (img_height, img_width),
    batch_size=batch_size,
    color_mode='rgb'
)
Found 15 files belonging to 2 classes.
In [ ]:
testing_data
Out[ ]:
<_BatchDataset element_spec=(TensorSpec(shape=(None, 250, 250, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.int32, name=None))>
In [ ]:
class_names = training_data.class_names
class_names
Out[ ]:
['Training Non-Cancer', 'Training cancer']
In [ ]:
## Configuring dataset for performance
AUTOTUNE = tf.data.experimental.AUTOTUNE
training_data = training_data.cache().prefetch(buffer_size=AUTOTUNE)
testing_data = testing_data.cache().prefetch(buffer_size=AUTOTUNE)
In [ ]:
## Defining Cnn
model = tf.keras.models.Sequential([
  layers.BatchNormalization(),
  layers.Conv2D(32, 3, activation='relu'), # Conv2D(f_size, filter_size, activation) # relu, sigmoid, softmax
  layers.MaxPooling2D(), # MaxPooling
  layers.Conv2D(64, 3, activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(128, 3, activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(256, 3, activation='relu'),
  layers.MaxPooling2D(),
  layers.Flatten(),
  layers.Dense(512, activation='relu'),
  layers.Dense(len(class_names), activation= 'softmax')
])

model.compile(optimizer='adam',loss='sparse_categorical_crossentropy', metrics=['accuracy'])
In [ ]:
model.build((None, 250, 250, 3))
model.summary()
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 batch_normalization (BatchN  (None, 250, 250, 3)      12        
 ormalization)                                                   
                                                                 
 conv2d (Conv2D)             (None, 248, 248, 32)      896       
                                                                 
 max_pooling2d (MaxPooling2D  (None, 124, 124, 32)     0         
 )                                                               
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 batch_normalization (BatchN  (None, 250, 250, 3)      12        
 ormalization)                                                   
                                                                 
 conv2d (Conv2D)             (None, 248, 248, 32)      896       
                                                                 
 max_pooling2d (MaxPooling2D  (None, 124, 124, 32)     0         
 )                                                               
                                                                 
 conv2d_1 (Conv2D)           (None, 122, 122, 64)      18496     
                                                                 
 max_pooling2d_1 (MaxPooling  (None, 61, 61, 64)       0         
 2D)                                                             
                                                                 
 conv2d_2 (Conv2D)           (None, 59, 59, 128)       73856     
                                                                 
 max_pooling2d_2 (MaxPooling  (None, 29, 29, 128)      0         
 2D)                                                             
                                                                 
 conv2d_3 (Conv2D)           (None, 27, 27, 256)       295168    
                                                                 
 max_pooling2d_3 (MaxPooling  (None, 13, 13, 256)      0         
 2D)                                                             
                                                                 
 flatten (Flatten)           (None, 43264)             0         
                                                                 
 dense (Dense)               (None, 512)               22151680  
                                                                 
 dense_1 (Dense)             (None, 2)                 1026      
                                                                 
=================================================================
Total params: 22,541,134
Trainable params: 22,541,128
Non-trainable params: 6
_________________________________________________________________
In [ ]:
## lets train our CNN
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = model.fit(training_data, validation_data=validation_data, epochs = 50, callbacks=callbacks_list)
Epoch 1/50
3/3 [==============================] - ETA: 0s - loss: 9.1437e-06 - accuracy: 1.0000
Epoch 1: val_accuracy improved from -inf to 0.54545, saving model to model_weights.h5
3/3 [==============================] - 30s 9s/step - loss: 9.1437e-06 - accuracy: 1.0000 - val_loss: 11.8699 - val_accuracy: 0.5455
Epoch 2/50
3/3 [==============================] - ETA: 0s - loss: 7.9455e-06 - accuracy: 1.0000
Epoch 2: val_accuracy did not improve from 0.54545
3/3 [==============================] - 20s 6s/step - loss: 7.9455e-06 - accuracy: 1.0000 - val_loss: 11.3084 - val_accuracy: 0.5455
Epoch 3/50
3/3 [==============================] - ETA: 0s - loss: 7.1059e-06 - accuracy: 1.0000
Epoch 3: val_accuracy did not improve from 0.54545
3/3 [==============================] - 20s 7s/step - loss: 7.1059e-06 - accuracy: 1.0000 - val_loss: 10.7544 - val_accuracy: 0.5455
Epoch 4/50
3/3 [==============================] - ETA: 0s - loss: 6.4954e-06 - accuracy: 1.0000
Epoch 4: val_accuracy did not improve from 0.54545
3/3 [==============================] - 16s 5s/step - loss: 6.4954e-06 - accuracy: 1.0000 - val_loss: 10.2117 - val_accuracy: 0.5455
Epoch 5/50
3/3 [==============================] - ETA: 0s - loss: 6.0331e-06 - accuracy: 1.0000
Epoch 5: val_accuracy did not improve from 0.54545
3/3 [==============================] - 16s 5s/step - loss: 6.0331e-06 - accuracy: 1.0000 - val_loss: 9.6825 - val_accuracy: 0.5455
Epoch 6/50
3/3 [==============================] - ETA: 0s - loss: 5.6755e-06 - accuracy: 1.0000
Epoch 6: val_accuracy did not improve from 0.54545
3/3 [==============================] - 16s 5s/step - loss: 5.6755e-06 - accuracy: 1.0000 - val_loss: 9.1676 - val_accuracy: 0.5455
Epoch 7/50
3/3 [==============================] - ETA: 0s - loss: 5.3917e-06 - accuracy: 1.0000
Epoch 7: val_accuracy did not improve from 0.54545
3/3 [==============================] - 17s 5s/step - loss: 5.3917e-06 - accuracy: 1.0000 - val_loss: 8.6670 - val_accuracy: 0.5455
Epoch 8/50
3/3 [==============================] - ETA: 0s - loss: 5.1636e-06 - accuracy: 1.0000
Epoch 8: val_accuracy did not improve from 0.54545
3/3 [==============================] - 16s 5s/step - loss: 5.1636e-06 - accuracy: 1.0000 - val_loss: 8.1808 - val_accuracy: 0.5455
Epoch 9/50
3/3 [==============================] - ETA: 0s - loss: 4.9747e-06 - accuracy: 1.0000
Epoch 9: val_accuracy did not improve from 0.54545
3/3 [==============================] - 17s 5s/step - loss: 4.9747e-06 - accuracy: 1.0000 - val_loss: 7.7089 - val_accuracy: 0.5455
Epoch 10/50
3/3 [==============================] - ETA: 0s - loss: 4.8173e-06 - accuracy: 1.0000
Epoch 10: val_accuracy did not improve from 0.54545
3/3 [==============================] - 17s 5s/step - loss: 4.8173e-06 - accuracy: 1.0000 - val_loss: 7.2512 - val_accuracy: 0.5455
Epoch 11/50
3/3 [==============================] - ETA: 0s - loss: 4.6873e-06 - accuracy: 1.0000
Epoch 11: val_accuracy did not improve from 0.54545
3/3 [==============================] - 16s 5s/step - loss: 4.6873e-06 - accuracy: 1.0000 - val_loss: 6.8084 - val_accuracy: 0.5455
Epoch 12/50
3/3 [==============================] - ETA: 0s - loss: 4.5712e-06 - accuracy: 1.0000
Epoch 12: val_accuracy improved from 0.54545 to 0.54978, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 4.5712e-06 - accuracy: 1.0000 - val_loss: 6.3811 - val_accuracy: 0.5498
Epoch 13/50
3/3 [==============================] - ETA: 0s - loss: 4.4721e-06 - accuracy: 1.0000
Epoch 13: val_accuracy improved from 0.54978 to 0.55411, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 4.4721e-06 - accuracy: 1.0000 - val_loss: 5.9703 - val_accuracy: 0.5541
Epoch 14/50
3/3 [==============================] - ETA: 0s - loss: 4.3807e-06 - accuracy: 1.0000
Epoch 14: val_accuracy improved from 0.55411 to 0.55844, saving model to model_weights.h5
3/3 [==============================] - 23s 8s/step - loss: 4.3807e-06 - accuracy: 1.0000 - val_loss: 5.5773 - val_accuracy: 0.5584
Epoch 15/50
3/3 [==============================] - ETA: 0s - loss: 4.2997e-06 - accuracy: 1.0000
Epoch 15: val_accuracy improved from 0.55844 to 0.58874, saving model to model_weights.h5
3/3 [==============================] - 26s 9s/step - loss: 4.2997e-06 - accuracy: 1.0000 - val_loss: 5.2029 - val_accuracy: 0.5887
Epoch 16/50
3/3 [==============================] - ETA: 0s - loss: 4.2280e-06 - accuracy: 1.0000
Epoch 16: val_accuracy improved from 0.58874 to 0.60606, saving model to model_weights.h5
3/3 [==============================] - 24s 9s/step - loss: 4.2280e-06 - accuracy: 1.0000 - val_loss: 4.8477 - val_accuracy: 0.6061
Epoch 17/50
3/3 [==============================] - ETA: 0s - loss: 4.1573e-06 - accuracy: 1.0000
Epoch 17: val_accuracy improved from 0.60606 to 0.61472, saving model to model_weights.h5
3/3 [==============================] - 20s 7s/step - loss: 4.1573e-06 - accuracy: 1.0000 - val_loss: 4.5111 - val_accuracy: 0.6147
Epoch 18/50
3/3 [==============================] - ETA: 0s - loss: 4.0918e-06 - accuracy: 1.0000
Epoch 18: val_accuracy improved from 0.61472 to 0.62771, saving model to model_weights.h5
3/3 [==============================] - 20s 7s/step - loss: 4.0918e-06 - accuracy: 1.0000 - val_loss: 4.1920 - val_accuracy: 0.6277
Epoch 19/50
3/3 [==============================] - ETA: 0s - loss: 4.0319e-06 - accuracy: 1.0000
Epoch 19: val_accuracy improved from 0.62771 to 0.63636, saving model to model_weights.h5
3/3 [==============================] - 19s 6s/step - loss: 4.0319e-06 - accuracy: 1.0000 - val_loss: 3.8899 - val_accuracy: 0.6364
Epoch 20/50
3/3 [==============================] - ETA: 0s - loss: 3.9725e-06 - accuracy: 1.0000
Epoch 20: val_accuracy improved from 0.63636 to 0.64502, saving model to model_weights.h5
3/3 [==============================] - 22s 8s/step - loss: 3.9725e-06 - accuracy: 1.0000 - val_loss: 3.6042 - val_accuracy: 0.6450
Epoch 21/50
3/3 [==============================] - ETA: 0s - loss: 3.9148e-06 - accuracy: 1.0000
Epoch 21: val_accuracy improved from 0.64502 to 0.66234, saving model to model_weights.h5
3/3 [==============================] - 21s 8s/step - loss: 3.9148e-06 - accuracy: 1.0000 - val_loss: 3.3344 - val_accuracy: 0.6623
Epoch 22/50
3/3 [==============================] - ETA: 0s - loss: 3.8580e-06 - accuracy: 1.0000
Epoch 22: val_accuracy improved from 0.66234 to 0.68398, saving model to model_weights.h5
3/3 [==============================] - 21s 7s/step - loss: 3.8580e-06 - accuracy: 1.0000 - val_loss: 3.0798 - val_accuracy: 0.6840
Epoch 23/50
3/3 [==============================] - ETA: 0s - loss: 3.8038e-06 - accuracy: 1.0000
Epoch 23: val_accuracy improved from 0.68398 to 0.68831, saving model to model_weights.h5
3/3 [==============================] - 19s 7s/step - loss: 3.8038e-06 - accuracy: 1.0000 - val_loss: 2.8400 - val_accuracy: 0.6883
Epoch 24/50
3/3 [==============================] - ETA: 0s - loss: 3.7491e-06 - accuracy: 1.0000
Epoch 24: val_accuracy improved from 0.68831 to 0.69697, saving model to model_weights.h5
3/3 [==============================] - 19s 6s/step - loss: 3.7491e-06 - accuracy: 1.0000 - val_loss: 2.6145 - val_accuracy: 0.6970
Epoch 25/50
3/3 [==============================] - ETA: 0s - loss: 3.6954e-06 - accuracy: 1.0000
Epoch 25: val_accuracy improved from 0.69697 to 0.72294, saving model to model_weights.h5
3/3 [==============================] - 19s 6s/step - loss: 3.6954e-06 - accuracy: 1.0000 - val_loss: 2.4039 - val_accuracy: 0.7229
Epoch 26/50
3/3 [==============================] - ETA: 0s - loss: 3.6371e-06 - accuracy: 1.0000
Epoch 26: val_accuracy improved from 0.72294 to 0.73593, saving model to model_weights.h5
3/3 [==============================] - 19s 7s/step - loss: 3.6371e-06 - accuracy: 1.0000 - val_loss: 2.2081 - val_accuracy: 0.7359
Epoch 27/50
3/3 [==============================] - ETA: 0s - loss: 3.5809e-06 - accuracy: 1.0000
Epoch 27: val_accuracy improved from 0.73593 to 0.75325, saving model to model_weights.h5
3/3 [==============================] - 21s 8s/step - loss: 3.5809e-06 - accuracy: 1.0000 - val_loss: 2.0268 - val_accuracy: 0.7532
Epoch 28/50
3/3 [==============================] - ETA: 0s - loss: 3.5184e-06 - accuracy: 1.0000
Epoch 28: val_accuracy did not improve from 0.75325
3/3 [==============================] - 17s 5s/step - loss: 3.5184e-06 - accuracy: 1.0000 - val_loss: 1.8581 - val_accuracy: 0.7532
Epoch 29/50
3/3 [==============================] - ETA: 0s - loss: 3.4550e-06 - accuracy: 1.0000
Epoch 29: val_accuracy improved from 0.75325 to 0.77056, saving model to model_weights.h5
3/3 [==============================] - 19s 7s/step - loss: 3.4550e-06 - accuracy: 1.0000 - val_loss: 1.7008 - val_accuracy: 0.7706
Epoch 30/50
3/3 [==============================] - ETA: 0s - loss: 3.3837e-06 - accuracy: 1.0000
Epoch 30: val_accuracy improved from 0.77056 to 0.78355, saving model to model_weights.h5
3/3 [==============================] - 20s 7s/step - loss: 3.3837e-06 - accuracy: 1.0000 - val_loss: 1.5542 - val_accuracy: 0.7835
Epoch 31/50
3/3 [==============================] - ETA: 0s - loss: 3.3053e-06 - accuracy: 1.0000
Epoch 31: val_accuracy improved from 0.78355 to 0.79654, saving model to model_weights.h5
3/3 [==============================] - 21s 7s/step - loss: 3.3053e-06 - accuracy: 1.0000 - val_loss: 1.4176 - val_accuracy: 0.7965
Epoch 32/50
3/3 [==============================] - ETA: 0s - loss: 3.2217e-06 - accuracy: 1.0000
Epoch 32: val_accuracy improved from 0.79654 to 0.80519, saving model to model_weights.h5
3/3 [==============================] - 21s 7s/step - loss: 3.2217e-06 - accuracy: 1.0000 - val_loss: 1.2901 - val_accuracy: 0.8052
Epoch 33/50
3/3 [==============================] - ETA: 0s - loss: 3.1216e-06 - accuracy: 1.0000
Epoch 33: val_accuracy improved from 0.80519 to 0.80952, saving model to model_weights.h5
3/3 [==============================] - 19s 6s/step - loss: 3.1216e-06 - accuracy: 1.0000 - val_loss: 1.1712 - val_accuracy: 0.8095
Epoch 34/50
3/3 [==============================] - ETA: 0s - loss: 3.0184e-06 - accuracy: 1.0000
Epoch 34: val_accuracy improved from 0.80952 to 0.81385, saving model to model_weights.h5
3/3 [==============================] - 19s 7s/step - loss: 3.0184e-06 - accuracy: 1.0000 - val_loss: 1.0598 - val_accuracy: 0.8139
Epoch 35/50
3/3 [==============================] - ETA: 0s - loss: 2.9054e-06 - accuracy: 1.0000
Epoch 35: val_accuracy improved from 0.81385 to 0.82684, saving model to model_weights.h5
3/3 [==============================] - 20s 7s/step - loss: 2.9054e-06 - accuracy: 1.0000 - val_loss: 0.9559 - val_accuracy: 0.8268
Epoch 36/50
3/3 [==============================] - ETA: 0s - loss: 2.7851e-06 - accuracy: 1.0000
Epoch 36: val_accuracy improved from 0.82684 to 0.83550, saving model to model_weights.h5
3/3 [==============================] - 22s 8s/step - loss: 2.7851e-06 - accuracy: 1.0000 - val_loss: 0.8591 - val_accuracy: 0.8355
Epoch 37/50
3/3 [==============================] - ETA: 0s - loss: 2.6654e-06 - accuracy: 1.0000
Epoch 37: val_accuracy improved from 0.83550 to 0.84416, saving model to model_weights.h5
3/3 [==============================] - 19s 7s/step - loss: 2.6654e-06 - accuracy: 1.0000 - val_loss: 0.7692 - val_accuracy: 0.8442
Epoch 38/50
3/3 [==============================] - ETA: 0s - loss: 2.5390e-06 - accuracy: 1.0000
Epoch 38: val_accuracy improved from 0.84416 to 0.85281, saving model to model_weights.h5
3/3 [==============================] - 19s 6s/step - loss: 2.5390e-06 - accuracy: 1.0000 - val_loss: 0.6860 - val_accuracy: 0.8528
Epoch 39/50
3/3 [==============================] - ETA: 0s - loss: 2.4177e-06 - accuracy: 1.0000
Epoch 39: val_accuracy improved from 0.85281 to 0.86580, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 2.4177e-06 - accuracy: 1.0000 - val_loss: 0.6095 - val_accuracy: 0.8658
Epoch 40/50
3/3 [==============================] - ETA: 0s - loss: 2.2944e-06 - accuracy: 1.0000
Epoch 40: val_accuracy improved from 0.86580 to 0.87446, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 2.2944e-06 - accuracy: 1.0000 - val_loss: 0.5396 - val_accuracy: 0.8745
Epoch 41/50
3/3 [==============================] - ETA: 0s - loss: 2.1757e-06 - accuracy: 1.0000
Epoch 41: val_accuracy improved from 0.87446 to 0.88312, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 2.1757e-06 - accuracy: 1.0000 - val_loss: 0.4758 - val_accuracy: 0.8831
Epoch 42/50
3/3 [==============================] - ETA: 0s - loss: 2.0637e-06 - accuracy: 1.0000
Epoch 42: val_accuracy did not improve from 0.88312
3/3 [==============================] - 16s 5s/step - loss: 2.0637e-06 - accuracy: 1.0000 - val_loss: 0.4176 - val_accuracy: 0.8831
Epoch 43/50
3/3 [==============================] - ETA: 0s - loss: 1.9512e-06 - accuracy: 1.0000
Epoch 43: val_accuracy did not improve from 0.88312
3/3 [==============================] - 16s 5s/step - loss: 1.9512e-06 - accuracy: 1.0000 - val_loss: 0.3643 - val_accuracy: 0.8831
Epoch 44/50
3/3 [==============================] - ETA: 0s - loss: 1.8500e-06 - accuracy: 1.0000
Epoch 44: val_accuracy improved from 0.88312 to 0.90043, saving model to model_weights.h5
3/3 [==============================] - 17s 6s/step - loss: 1.8500e-06 - accuracy: 1.0000 - val_loss: 0.3157 - val_accuracy: 0.9004
Epoch 45/50
3/3 [==============================] - ETA: 0s - loss: 1.7499e-06 - accuracy: 1.0000
Epoch 45: val_accuracy improved from 0.90043 to 0.90909, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 1.7499e-06 - accuracy: 1.0000 - val_loss: 0.2717 - val_accuracy: 0.9091
Epoch 46/50
3/3 [==============================] - ETA: 0s - loss: 1.6560e-06 - accuracy: 1.0000
Epoch 46: val_accuracy improved from 0.90909 to 0.92641, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 1.6560e-06 - accuracy: 1.0000 - val_loss: 0.2321 - val_accuracy: 0.9264
Epoch 47/50
3/3 [==============================] - ETA: 0s - loss: 1.5662e-06 - accuracy: 1.0000
Epoch 47: val_accuracy improved from 0.92641 to 0.93939, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 1.5662e-06 - accuracy: 1.0000 - val_loss: 0.1970 - val_accuracy: 0.9394
Epoch 48/50
3/3 [==============================] - ETA: 0s - loss: 1.4878e-06 - accuracy: 1.0000
Epoch 48: val_accuracy improved from 0.93939 to 0.95238, saving model to model_weights.h5
3/3 [==============================] - 17s 6s/step - loss: 1.4878e-06 - accuracy: 1.0000 - val_loss: 0.1663 - val_accuracy: 0.9524
Epoch 49/50
3/3 [==============================] - ETA: 0s - loss: 1.4119e-06 - accuracy: 1.0000
Epoch 49: val_accuracy improved from 0.95238 to 0.95671, saving model to model_weights.h5
3/3 [==============================] - 19s 6s/step - loss: 1.4119e-06 - accuracy: 1.0000 - val_loss: 0.1397 - val_accuracy: 0.9567
Epoch 50/50
3/3 [==============================] - ETA: 0s - loss: 1.3453e-06 - accuracy: 1.0000
Epoch 50: val_accuracy improved from 0.95671 to 0.96537, saving model to model_weights.h5
3/3 [==============================] - 18s 6s/step - loss: 1.3453e-06 - accuracy: 1.0000 - val_loss: 0.1171 - val_accuracy: 0.9654
In [ ]:
###### serialize model structure to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
In [ ]:
## stats on training data
plt.plot(history.history['loss'], label = 'training loss')
plt.plot(history.history['accuracy'], label = 'training accuracy')
plt.grid(True)
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x203c6226710>
In [ ]:
## stats on training data
plt.plot(history.history['val_loss'], label = 'validation loss')
plt.plot(history.history['val_accuracy'], label = 'validation accuracy')
plt.grid(True)
plt.legend()
Out[ ]:
<matplotlib.legend.Legend at 0x20393822810>
In [ ]:
## lets vizualize results on testing data
AccuracyVector = []
plt.figure(figsize=(30, 30))
for images, labels in testing_data.take(1):
    predictions = model.predict(images)
    predlabel = []
    prdlbl = []
    
    for mem in predictions:
        predlabel.append(class_names[np.argmax(mem)])
        prdlbl.append(np.argmax(mem))
    
    AccuracyVector = np.array(prdlbl) == labels
    for i in range(40):
        ax = plt.subplot(10, 4, i + 1)
        plt.imshow(images[i].numpy().astype("uint8"))
        plt.title('Pred: '+ predlabel[i]+' actl:'+class_names[labels[i]] )
        plt.axis('off')
        plt.grid(True)
1/1 [==============================] - 0s 313ms/step
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
Cell In[45], line 16
     14 for i in range(40):
     15     ax = plt.subplot(10, 4, i + 1)
---> 16     plt.imshow(images[i].numpy().astype("uint8"))
     17     plt.title('Pred: '+ predlabel[i]+' actl:'+class_names[labels[i]] )
     18     plt.axis('off')

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\util\traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    151 except Exception as e:
    152   filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153   raise e.with_traceback(filtered_tb) from None
    154 finally:
    155   del filtered_tb

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\framework\ops.py:7262, in raise_from_not_ok_status(e, name)
   7260 def raise_from_not_ok_status(e, name):
   7261   e.message += (" name: " + name if name is not None else "")
-> 7262   raise core._status_to_exception(e) from None

InvalidArgumentError: {{function_node __wrapped__StridedSlice_device_/job:localhost/replica:0/task:0/device:CPU:0}} slice index 15 of dimension 0 out of bounds. [Op:StridedSlice] name: strided_slice/
In [ ]:
from keras.models import model_from_json
import numpy as np

class cancerDetectionModel(object):

    class_nums = ['Training cancer', "Training Non-Cancer"]

    def __init__(self, model_json_file, model_weights_file):
        # load model from JSON file
        with open(model_json_file, "r") as json_file:
            loaded_model_json = json_file.read()
            self.loaded_model = model_from_json(loaded_model_json)

        # load weights into the new model
        self.loaded_model.load_weights(model_weights_file)
        self.loaded_model.make_predict_function()

    def predict_cancer(self, img):
        self.preds = self.loaded_model.predict(img)
        return cancerDetectionModel.class_nums[np.argmax(self.preds)], self.preds
In [ ]:
full_train_df = pd.read_csv("D:/Dataset/converted_keras/labels.txt")
full_train_df.head()
Out[ ]:
0 cancer
0 1 Non-Cancer
In [ ]:
print("Train Size: {}".format(len(os.listdir('D:\Dataset\Train\Training cancer'))))
print("Test Size: {}".format(len(os.listdir('D:\Dataset\Train\Training Non-Cancer'))))
Train Size: 126
Test Size: 105
In [ ]:
import os
from PIL import Image
from matplotlib import pyplot as plt
In [ ]:
root1 = 'D:\Dataset\Train\Training cancer'

fnames = os.listdir(root1)
In [ ]:
len(fnames)
Out[ ]:
105
In [ ]:
root2 = 'D:\Dataset\Train\Training Non-Cancer'

fnames = os.listdir(root2)
In [ ]:
len(fnames)
Out[ ]:
105
In [ ]:
fig , axs = plt.subplots(nrows=2,ncols=5,figsize=(10,10))
axs = axs.flatten()
for i in range(10):
    filepath = os.path.join(root1,root2,fnames[i])
    img = Image.open(filepath)
    axs[i].imshow(img)
    axs[i].axis('off')
    axs[i].set_title(fnames[i])
plt.show()
In [ ]:
##### For CSV FORMATTTT
# from PIL import Image
# import numpy as np
# import sys
# import os
# import csv 

# def createFileList(myDir, format='jpg'):
#     fileList = []
#     print(myDir)
#     for root, dirs, files in os.walk(myDir, topdown = False):
#         for name in files:
#             if name.endswith(format):
#                 fullName = os.path.join(root, name)
#                 fileList.append(fullName)
#     return fileList
# myFileList = createFileList('D:\Dataset\Train\Training cancer')

# for file in myFileList:
#     print(file)
#     img_file = Image.open(file)
#     width, height = img_file.size
#     format = img_file.format
#     mode = img_file.mode
#     img_grey = img_file.convert('L')

#     value = np.asarray(img_grey.getdata(), dtype=np.int).reshape((img_grey.size[1], img_grey.size[0]))
#     value = value.flatten()
#     print(value)
#     with open("image_to_csv.csv", 'a') as f:
#         writer = csv.writer(f)
#         writer.writerow(value)
In [ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
In [ ]:
import os 
path = os.listdir('D:/Dataset/Train')
classes = {'Training cancer': 0, 'Training Non-Cancer': 1}
In [ ]:
import cv2 
X = []
Y = []
for cls in classes:
    pth = 'D:/Dataset/Train/'+cls
    for j in os.listdir(pth):
        img = cv2.imread(pth+'/'+j,0)
        img = cv2.resize(img, (1028,1028))
        X.append(img)
        Y.append(classes[cls])
In [ ]:
np.unique(Y)
Out[ ]:
array([0, 1])
In [ ]:
X = np.array(X)
Y = np.array(Y)
In [ ]:
pd.Series(Y).value_counts()
Out[ ]:
1    290
0    251
dtype: int64
In [ ]:
X.shape
Out[ ]:
(541, 1028, 1028)
In [ ]:
#Visualize Data 
plt.imshow(X[0], cmap='gray')
Out[ ]:
<matplotlib.image.AxesImage at 0x17d5d7e8e10>
In [ ]:
#Prepare Data
X_updated = X.reshape(len(X), -1)
X_updated.shape
Out[ ]:
(541, 1056784)
In [ ]:
#split Data
xtrain, xtest, ytrain, ytest = train_test_split(X_updated, Y, random_state=10,test_size=.20)
In [ ]:
xtrain.shape, xtest.shape
Out[ ]:
((432, 1056784), (109, 1056784))
In [ ]:
#Feature Scaling 
print(xtrain.max(), xtrain.min())
print(xtest.max(), xtest.min())
xtrain = xtrain/255
xtest = xtest/255
print(xtrain.max(), xtrain.min())
print(xtest.max(), xtest.min())
255 5
255 7
1.0 0.0196078431372549
1.0 0.027450980392156862
In [ ]:
#Fearute selection: PCA
from sklearn.decomposition import PCA
In [ ]:
print(xtrain.shape, xtest.shape)
pca = PCA(.98)
# pca_train = pca.fit_transform(xtrain)
# pca_test = pca.transform(xtest)
pca_train = xtrain
pca_test = xtest
(432, 1056784) (109, 1056784)
In [ ]:
# print(pca_train.shape, pca_test.shape)
# print(pca.n_components_)
# print(pca.n_features_)
In [ ]:
###TRAIN MODEL
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
In [ ]:
import warnings
warnings.filterwarnings('ignore')

lg = LogisticRegression(C=0.1) #Penality Parameter
lg.fit(pca_train, ytrain)
Out[ ]:
LogisticRegression(C=0.1)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
LogisticRegression(C=0.1)
In [ ]:
sv = SVC()
sv.fit(pca_train, ytrain)
Out[ ]:
SVC()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
SVC()
In [ ]:
#Evaluation
print("Training Score :", lg.score(pca_train, ytrain))
print("Testing Score:", lg.score(pca_test, ytest))
Training Score : 1.0
Testing Score: 0.9724770642201835
In [ ]:
print("Training Score :", sv.score(pca_train, ytrain))
print("Testing Score:", sv.score(pca_test, ytest))
Training Score : 1.0
Testing Score: 0.9724770642201835
In [ ]:
#Prediction
pred = sv.predict(pca_test)
np.where(ytest!=pred)
Out[ ]:
(array([ 22,  59, 102], dtype=int64),)
In [ ]:
pred[36]
Out[ ]:
1
In [ ]:
ytest[36]
Out[ ]:
1
In [ ]:
#TEST MODEL
dec = {0:'cancer', 1:'Non-Cancer'}
In [ ]:
plt.figure(figsize=(12,8))
p = os.listdir('D:/Dataset/test/')
c=1
for i in os.listdir('D:/Dataset/test/cancer/')[:8]:
    plt.subplot(3,3,c)

    img = cv2.imread('D:/Dataset/test/cancer/'+i,0)
    img1 = cv2.resize(img, (1028,1028))
    img1 = img1.reshape(1,-1)/255
    p = sv.predict(img1)
    plt.title(dec[p[0]])
    plt.imshow(img, cmap='gray')
    plt.axis('off')
    c+=1
In [ ]:
plt.figure(figsize=(12,8))
p = os.listdir('D:/Dataset/test/')
c=1
for i in os.listdir('D:/Dataset/test/Non-Cancer/')[:6]:
    plt.subplot(3,3,c)

    img = cv2.imread('D:/Dataset/test/Non-Cancer/'+i,0)
    img1 = cv2.resize(img, (1028,1028))
    img1 = img1.reshape(1,-1)/255
    p = sv.predict(img1)
    plt.title(dec[p[0]])
    plt.imshow(img, cmap='gray')
    plt.axis('off')
    c+=1
In [ ]:
BASE_PATH = 'D:/Dataset/Train/'
unique_classes = []
for path in os.listdir(BASE_PATH):
    unique_classes.append(path)
print(unique_classes)
['Training cancer', 'Training Non-Cancer']
In [ ]:
class_index = [unique_classes[1], unique_classes[0]]
for c in class_index:
    print(c, "-", class_index.index(c))
Training Non-Cancer - 0
Training cancer - 1
In [ ]:
images = []
masks = []
labels = []
for folder in os.listdir(BASE_PATH):
    class_path = os.path.join(BASE_PATH, folder)
    for img in os.listdir(class_path):
        if "_mask" not in img:
            img_path = os.path.join(class_path, img)
            msk_path = img_path.replace(".png", "_mask.png")
            # check if mask exist
            if os.path.exists(msk_path):
                images.append(img_path)
                masks.append(msk_path)
                labels.append(folder)
In [ ]:
print(len(images))
541
In [ ]:
images[0]
Out[ ]:
'D:/Dataset/Train/Training cancer\\1056.jpg'
In [ ]:
input_images_size = 256
channel = 1
In [ ]:
import cv2
import scipy
import scipy.ndimage


def load_image(img_path):
    """ Load single image as Grayscale
    """
    # load image as grayscale
    img = cv2.imread(img_path, 0)
    return img

def padding(img, msk):
    """ Pad images to make them square
    """
    size = np.max(img.shape)

    offset_x = (size-img.shape[0])//2
    offset_y = (size-img.shape[1])//2

    blank_image = np.zeros((size, size))
    blank_mask = np.zeros((size, size))

    blank_image[offset_x:offset_x+img.shape[0],
               offset_y:offset_y+img.shape[1]] = img
    blank_mask[offset_x:offset_x+img.shape[0],
               offset_y:offset_y+img.shape[1]] = msk
    return blank_image, blank_mask

def resize_mask(mask):
    """Resize mask, its different because mask pixel value can change because of resize
    """
    new_size = np.array([input_images_size, input_images_size]) / mask.shape
    mask = scipy.ndimage.interpolation.zoom(mask, new_size)
    return mask

def resize(img):
    """Resize image
    """
    img = cv2.resize(img, (input_images_size, input_images_size))
    return img
        
def preprocess(img):
    """Image preprocessing
    Normalize image
    """
    img = img/255.0
    return img

def inverse_preprocess(img):
    """Inverse of preprocessing
    """
    img = img*255
    return img

def load_data(img_path, msk_path, label):
    """Load image, mask and repalce mask value with class index
    0 = normal
    1 = benign
    2 = malignant
    """
    img = load_image(img_path)
    msk = load_image(msk_path)
    img, msk = padding(img, msk)
    label_indx = class_index.index(label)
    msk[msk == 255] = 1
    msk = msk.astype("uint8")
    img = resize(img)
    msk = resize_mask(msk)
    new_mask = np.zeros((input_images_size, input_images_size, 2))
    if label_indx != 0:
        new_mask[:, :, label_indx-1] = msk
#     print(np.unique(msk), label, label_indx)
    return img, new_mask

def load_batch(images, masks, labels):
    """Load Batch of data
    """
    batch_x = []
    batch_y = []
    for i, m, l in zip(images, masks, labels):
        img, msk = load_data(i, m, l)
        img = preprocess(img)
        batch_x.append(img)
        batch_y.append(msk)
    return np.array(batch_x), np.array(batch_y) 
In [ ]:
import matplotlib.pyplot as plt
for i in [0, 500, 600]:
    indx = i
    img, msk = load_data(images[indx], masks[indx], labels[indx])
    print(np.min(img), np.max(img), img.shape)
    plt.figure(figsize=(20, 20))
    plt.subplot(1, 3, 1)
    plt.axis("off")
    plt.imshow(img)
    plt.subplot(1, 3, 2)
    plt.axis("off")
    plt.imshow(msk[:, :, 0])
    plt.subplot(1, 3, 3)
    plt.axis("off")
    plt.imshow(msk[:, :, 1])
    plt.show()
16.55059814453125 217.49298095703125 (256, 256)
21.66058349609375 251.78179931640625 (256, 256)
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
Cell In[52], line 4
      2 for i in [0, 500, 600]:
      3     indx = i
----> 4     img, msk = load_data(images[indx], masks[indx], labels[indx])
      5     print(np.min(img), np.max(img), img.shape)
      6     plt.figure(figsize=(20, 20))

IndexError: list index out of range
In [ ]:
images = np.array(images)
masks = np.array(masks)
labels = np.array(labels)
In [ ]:
!pip install segmentation_models
Requirement already satisfied: segmentation_models in c:\python311\lib\site-packages (1.0.1)
Requirement already satisfied: keras-applications<=1.0.8,>=1.0.7 in c:\python311\lib\site-packages (from segmentation_models) (1.0.8)
Requirement already satisfied: image-classifiers==1.0.0 in c:\python311\lib\site-packages (from segmentation_models) (1.0.0)
Requirement already satisfied: efficientnet==1.0.0 in c:\python311\lib\site-packages (from segmentation_models) (1.0.0)
Requirement already satisfied: scikit-image in c:\python311\lib\site-packages (from efficientnet==1.0.0->segmentation_models) (0.20.0)
Requirement already satisfied: numpy>=1.9.1 in c:\python311\lib\site-packages (from keras-applications<=1.0.8,>=1.0.7->segmentation_models) (1.23.5)
Requirement already satisfied: h5py in c:\users\hp\appdata\roaming\python\python311\site-packages (from keras-applications<=1.0.8,>=1.0.7->segmentation_models) (3.8.0)
Requirement already satisfied: scipy>=1.8 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (1.10.1)
Requirement already satisfied: networkx>=2.8 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (3.1)
Requirement already satisfied: pillow>=9.0.1 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (9.4.0)
Requirement already satisfied: imageio>=2.4.1 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (2.28.1)
Requirement already satisfied: tifffile>=2019.7.26 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (2023.4.12)
Requirement already satisfied: PyWavelets>=1.1.1 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (1.4.1)
Requirement already satisfied: packaging>=20.0 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (23.0)
Requirement already satisfied: lazy_loader>=0.1 in c:\python311\lib\site-packages (from scikit-image->efficientnet==1.0.0->segmentation_models) (0.2)
In [ ]:
pip install upgrade keras
Note: you may need to restart the kernel to use updated packages.
ERROR: Could not find a version that satisfies the requirement upgrade (from versions: none)
ERROR: No matching distribution found for upgrade
In [ ]:
import os
os.environ["SM_FRAMEWORK"] = "tf.keras"
from tensorflow import keras 
import segmentation_models as sm
import tensorflow as tf

sm.framework()

BACKBONE = 'resnet34'
LR = 0.00001
model = sm.Unet(BACKBONE, classes=2, activation="sigmoid",input_shape=(input_images_size,input_images_size, channel),encoder_weights=None)

optim = tf.keras.optimizers.Adam(LR)

dice_loss = sm.losses.DiceLoss()
focal_loss = sm.losses.BinaryFocalLoss()
total_loss = dice_loss + (1 * focal_loss)

metrics = [sm.metrics.IOUScore(threshold=0.5),
           sm.metrics.FScore(threshold=0.5)]

model.compile(optim, total_loss, metrics)
Segmentation Models: using `tf.keras` framework.
In [ ]:
batch_size = 4
history = {"epoch": []}
for e in range(100):
    print("epoch:",e, end=" > ")
    indexes = list(range(len(images)))
    temp_history = {"loss": [],
                   "IOU": [],
                   "F-Score": []}
    for b in range(0, len(images), batch_size):
        bs = b
        be = bs+batch_size
        batch_index = indexes[bs:be]
        batch_x, batch_y = load_batch(images[batch_index], masks[batch_index], labels[batch_index])
        batch_x = np.expand_dims(batch_x, axis=-1)
        batch_y = np.expand_dims(batch_y, axis=-1)
        batch_y = batch_y.astype("float32")
        loss = model.train_on_batch(batch_x, batch_y)
        temp_history["loss"].append(loss[0])
        temp_history["IOU"].append(loss[1])
        temp_history["F-Score"].append(loss[2])
    print("loss", np.round(np.mean(temp_history["loss"]), 4),"IOU", np.round(np.mean(temp_history["IOU"]), 4),"F-Score", np.round(np.mean(temp_history["F-Score"]), 4))
    
    history["epoch"].append(temp_history)
    
model.save_weights("cancer")
epoch: 0 > loss 0.814 IOU 0.1356 F-Score 0.1698
epoch: 1 > loss -1.2409 IOU 0.2723 F-Score 0.2485
epoch: 2 > loss -4.4345 IOU 0.3412 F-Score 0.2755
epoch: 3 > loss -8.0726 IOU 0.2924 F-Score 0.258
epoch: 4 > loss -11.4899 IOU 0.2444 F-Score 0.2374
epoch: 5 > 
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Cell In[66], line 17
     15 batch_y = np.expand_dims(batch_y, axis=-1)
     16 batch_y = batch_y.astype("float32")
---> 17 loss = model.train_on_batch(batch_x, batch_y)
     18 temp_history["loss"].append(loss[0])
     19 temp_history["IOU"].append(loss[1])

File c:\Python311\Lib\site-packages\keras\engine\training.py:2510, in Model.train_on_batch(self, x, y, sample_weight, class_weight, reset_metrics, return_dict)
   2506     iterator = data_adapter.single_batch_iterator(
   2507         self.distribute_strategy, x, y, sample_weight, class_weight
   2508     )
   2509     self.train_function = self.make_train_function()
-> 2510     logs = self.train_function(iterator)
   2512 logs = tf_utils.sync_to_numpy_or_python_type(logs)
   2513 if return_dict:

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\util\traceback_utils.py:150, in filter_traceback.<locals>.error_handler(*args, **kwargs)
    148 filtered_tb = None
    149 try:
--> 150   return fn(*args, **kwargs)
    151 except Exception as e:
    152   filtered_tb = _process_traceback_frames(e.__traceback__)

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\polymorphic_function\polymorphic_function.py:894, in Function.__call__(self, *args, **kwds)
    891 compiler = "xla" if self._jit_compile else "nonXla"
    893 with OptionalXlaContext(self._jit_compile):
--> 894   result = self._call(*args, **kwds)
    896 new_tracing_count = self.experimental_get_tracing_count()
    897 without_tracing = (tracing_count == new_tracing_count)

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\polymorphic_function\polymorphic_function.py:926, in Function._call(self, *args, **kwds)
    923   self._lock.release()
    924   # In this case we have created variables on the first call, so we run the
    925   # defunned version which is guaranteed to never create variables.
--> 926   return self._no_variable_creation_fn(*args, **kwds)  # pylint: disable=not-callable
    927 elif self._variable_creation_fn is not None:
    928   # Release the lock early so that multiple threads can perform the call
    929   # in parallel.
    930   self._lock.release()

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\polymorphic_function\tracing_compiler.py:143, in TracingCompiler.__call__(self, *args, **kwargs)
    140 with self._lock:
    141   (concrete_function,
    142    filtered_flat_args) = self._maybe_define_function(args, kwargs)
--> 143 return concrete_function._call_flat(
    144     filtered_flat_args, captured_inputs=concrete_function.captured_inputs)

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\polymorphic_function\monomorphic_function.py:1757, in ConcreteFunction._call_flat(self, args, captured_inputs, cancellation_manager)
   1753 possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args)
   1754 if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE
   1755     and executing_eagerly):
   1756   # No tape is watching; skip to running the function.
-> 1757   return self._build_call_outputs(self._inference_function.call(
   1758       ctx, args, cancellation_manager=cancellation_manager))
   1759 forward_backward = self._select_forward_and_backward_functions(
   1760     args,
   1761     possible_gradient_type,
   1762     executing_eagerly)
   1763 forward_function, args_with_tangents = forward_backward.forward()

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\polymorphic_function\monomorphic_function.py:381, in _EagerDefinedFunction.call(self, ctx, args, cancellation_manager)
    379 with _InterpolateFunctionError(self):
    380   if cancellation_manager is None:
--> 381     outputs = execute.execute(
    382         str(self.signature.name),
    383         num_outputs=self._num_outputs,
    384         inputs=args,
    385         attrs=attrs,
    386         ctx=ctx)
    387   else:
    388     outputs = execute.execute_with_cancellation(
    389         str(self.signature.name),
    390         num_outputs=self._num_outputs,
   (...)
    393         ctx=ctx,
    394         cancellation_manager=cancellation_manager)

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\execute.py:52, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
     50 try:
     51   ctx.ensure_initialized()
---> 52   tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
     53                                       inputs, attrs, num_outputs)
     54 except core._NotOkStatusException as e:
     55   if name is not None:

KeyboardInterrupt: 
In [ ]:
import matplotlib.pyplot as plt
for i in [0, 500, 600]:
    indx = i
    img, msk = load_data(images[indx], masks[indx], labels[indx])
    print(np.min(img), np.max(img), img.shape)
    print(img.shape)
    
    img2 = preprocess(img)
    pred = model.predict(np.array([img2]))
    pred = pred[0]

    plt.figure(figsize=(20, 20))
    plt.subplot(1, 3, 1)
    plt.axis("off")
    plt.imshow(img)
    plt.subplot(1, 3, 2)
    plt.axis("off")
    plt.imshow(pred[:, :, 0])
    plt.subplot(1, 3, 3)
    plt.axis("off")
    plt.imshow(pred[:, :, 1])
    plt.show()
16.55059814453125 217.49298095703125 (256, 256)
(256, 256)
1/1 [==============================] - 1s 1s/step
21.66058349609375 251.78179931640625 (256, 256)
(256, 256)
1/1 [==============================] - 0s 160ms/step
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
Cell In[67], line 4
      2 for i in [0, 500, 600]:
      3     indx = i
----> 4     img, msk = load_data(images[indx], masks[indx], labels[indx])
      5     print(np.min(img), np.max(img), img.shape)
      6     print(img.shape)

IndexError: index 600 is out of bounds for axis 0 with size 541
In [ ]:
from keras.models import load_model  # TensorFlow is required for Keras to work
from PIL import Image, ImageOps  # Install pillow instead of PIL
import numpy as np

# Disable scientific notation for clarity
np.set_printoptions(suppress=True)

# Load the model
model = load_model("D:/Dataset/converted_keras/keras_model.h5", compile=False)

# Load the labels
class_names = open("D:/Dataset/converted_keras/labels.txt", "r").readlines()

# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)

# Replace this with the path to your image ... PATH...PATH..PATH
image = Image.open("D:/Dataset/test/Non-Cancer/6.png").convert("RGB")

# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.Resampling.LANCZOS)

# turn the image into a numpy array
image_array = np.asarray(image)

# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1

# Load the image into the array
data[0] = normalized_image_array

# Predicts the model
prediction = model.predict(data)
index = np.argmax(prediction)
class_name = class_names[index]
confidence_score = prediction[0][index]

# Print prediction and confidence score
print("Class:", class_name[2:], end="")
print("Confidence Score:", confidence_score)
1/1 [==============================] - 1s 1s/step
Class: cancer
Confidence Score: 0.9686299